Clear all shadow caches when return to real mode from protect mode.
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 15 Dec 2005 19:38:44 +0000 (20:38 +0100)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 15 Dec 2005 19:38:44 +0000 (20:38 +0100)
So that, if OS modify some page tables in real mode and then
return to protect mode, no outdated shadow table be used because
out of sync machanism do not work in real mode.

Signed-off-by: Xiaofeng Ling <xiaofeng.ling@intel.com>
xen/arch/x86/shadow32.c
xen/arch/x86/shadow_public.c
xen/arch/x86/vmx.c
xen/include/asm-x86/shadow.h

index 7d517ec3903541c862aca83696364b4341b2958c..661aae27e7fceaa80243f3a18e0159ee65e163fd 100644 (file)
@@ -2982,6 +2982,23 @@ void __update_pagetables(struct vcpu *v)
     }
 }
 
+void clear_all_shadow_status(struct domain *d)
+{
+    shadow_lock(d);
+    free_shadow_pages(d);
+    free_shadow_ht_entries(d);
+    d->arch.shadow_ht = 
+        xmalloc_array(struct shadow_status, shadow_ht_buckets);
+    if ( d->arch.shadow_ht == NULL ) {
+        printk("clear all shadow status:xmalloc fail\n");
+        domain_crash_synchronous();
+    }
+    memset(d->arch.shadow_ht, 0,
+           shadow_ht_buckets * sizeof(struct shadow_status));
+
+    free_out_of_sync_entries(d);
+    shadow_unlock(d);
+}
 
 /************************************************************************/
 /************************************************************************/
index ea220edde1fc3f888838012bd2f733e855b8adad..971b659c35125fd27dab43154fcbcbde81536d1c 100644 (file)
@@ -1747,6 +1747,24 @@ void shadow_sync_and_drop_references(
     shadow_unlock(d);
 }
 
+void clear_all_shadow_status(struct domain *d)
+{
+    shadow_lock(d);
+    free_shadow_pages(d);
+    free_shadow_ht_entries(d);
+    d->arch.shadow_ht = 
+        xmalloc_array(struct shadow_status, shadow_ht_buckets);
+    if ( d->arch.shadow_ht == NULL ) {
+        printk("clear all shadow status:xmalloc fail\n");
+        domain_crash_synchronous();
+    }
+    memset(d->arch.shadow_ht, 0,
+           shadow_ht_buckets * sizeof(struct shadow_status));
+
+    free_out_of_sync_entries(d);
+    shadow_unlock(d);
+}
+
 /*
  * Local variables:
  * mode: C
index ff53fedea27981d7956206696cbf2db2ae70be17..e8e6f8d0b256d5dd5dc668835225e5c71ab3a1c5 100644 (file)
@@ -1223,6 +1223,7 @@ static int vmx_set_cr0(unsigned long value)
             }
         }
 
+        clear_all_shadow_status(v->domain);
         if (vmx_assist(v, VMX_ASSIST_INVOKE)) {
             set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &v->arch.arch_vmx.cpu_state);
             __vmread(GUEST_RIP, &eip);
index b0c24dd89553f0a585ccbb473d45e8ce53e0cd5f..b9a80068d20ff9436f0b8a00c833eea8fc4a5f75 100644 (file)
@@ -1707,6 +1707,8 @@ static inline void update_pagetables(struct vcpu *v)
     }
 }
 
+void clear_all_shadow_status(struct domain *d);
+
 #if SHADOW_DEBUG
 extern int _check_pagetable(struct vcpu *v, char *s);
 extern int _check_all_pagetables(struct vcpu *v, char *s);